This patch fixed the VTI domain destruction processing.
I tested creation/destruction domU. The "domU" which I tested is
VTI domain and non VTI domain. There was not the memory leak
issue, and VTI domain is not left in a zombie state.
Signed-off-by: Masaki Kanno <kanno.masaki@jp.fujitsu.com>
return tlb;
}
+void free_domain_tlb(struct vcpu *v)
+{
+ struct page_info *page;
+ void *vhptbase;
+ thash_cb_t *tlb;
+
+ if ( v->arch.vtlb ) {
+ tlb = v->arch.vtlb;
+ vhptbase = (void*)((u64)tlb + sizeof (thash_cb_t)) - VCPU_VHPT_SIZE;
+ page = virt_to_page(vhptbase);
+ free_domheap_pages(page, VCPU_VHPT_ORDER);
+ }
+}
+
/*
* Insert guest TLB to machine TLB.
* data: In TLB format
return vpd;
}
+/* Free vpd to xenheap */
+static void
+free_vpd(struct vcpu *v)
+{
+ if ( v->arch.privregs )
+ free_xenheap_pages(v->arch.privregs, get_order(VPD_SIZE));
+}
/*
* Create a VP on intialized VMX environment.
{
vpd_t *vpd;
+ free_xenheap_pages(v->arch.privregs, get_order(sizeof(mapped_regs_t)));
+
vpd = alloc_vpd();
ASSERT(vpd);
set_bit(ARCH_VMX_INTR_ASSIST, &v->arch.arch_vmx.flags);
}
+void
+vmx_relinquish_vcpu_resources(struct vcpu *v)
+{
+ vtime_t *vtm = &(v->arch.arch_vmx.vtm);
+
+ kill_timer(&vtm->vtm_timer);
+
+ free_domain_tlb(v);
+ free_vpd(v);
+}
+
typedef struct io_range {
unsigned long start;
unsigned long size;
*/
if ( (op->u.getmemlist.max_pfns == -1UL) &&
!test_bit(ARCH_VMX_CONTIG_MEM,
- &d->vcpu[0]->arch.arch_vmx.flags) )
- return vmx_alloc_contig_pages(d) ? (-ENOMEM) : 0;
+ &d->vcpu[0]->arch.arch_vmx.flags) ) {
+ ret = (long) vmx_alloc_contig_pages(d);
+ put_domain(d);
+ return ret ? (-ENOMEM) : 0;
+ }
for ( i = start_page; i < (start_page + nr_pages); i++ )
{
void free_vcpu_struct(struct vcpu *v)
{
- if (v->arch.privregs != NULL)
- free_xenheap_pages(v->arch.privregs, get_order(sizeof(mapped_regs_t)));
+ if (VMX_DOMAIN(v))
+ vmx_relinquish_vcpu_resources(v);
+ else {
+ if (v->arch.privregs != NULL)
+ free_xenheap_pages(v->arch.privregs, get_order(sizeof(mapped_regs_t)));
+ }
+
free_xenheap_pages(v, KERNEL_STACK_SIZE_ORDER);
}
extern void machine_tlb_insert(struct vcpu *d, thash_data_t *tlb);
extern ia64_rr vmmu_get_rr(struct vcpu *vcpu, u64 va);
extern thash_cb_t *init_domain_tlb(struct vcpu *d);
+extern void free_domain_tlb(struct vcpu *v);
extern thash_data_t * vsa_thash(PTA vpta, u64 va, u64 vrr, u64 *tag);
extern thash_data_t * vhpt_lookup(u64 va);
extern void machine_tlb_purge(u64 va, u64 ps);
extern void inject_guest_interruption(struct vcpu *vcpu, u64 vec);
extern void vmx_intr_assist(struct vcpu *v);
extern void set_illegal_op_isr (struct vcpu *vcpu);
-extern void illegal_op (struct vcpu *vcpu);
+extern void illegal_op (struct vcpu *vcpu);
+extern void vmx_relinquish_vcpu_resources(struct vcpu *v);
+
static inline vcpu_iodata_t *get_vio(struct domain *d, unsigned long cpu)
{
return &((shared_iopage_t *)d->arch.vmx_platform.shared_page_va)->vcpu_iodata[cpu];